Windows use region 4 and region 5 for identity mapping.
Signed-off-by: Anthony Xu <anthony.xu@intel.com>
IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, u64 pte, u64 itir, u64 ifa)
{
- u64 gpfn;
#ifdef VTLB_DEBUG
int slot;
u64 ps, va;
}
#endif //VTLB_DEBUG
pte &= ~PAGE_FLAGS_RV_MASK;
- gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
- if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
- pte |= VTLB_PTE_IO;
thash_purge_and_insert(vcpu, pte, itir, ifa, DSIDE_TLB);
return IA64_NO_FAULT;
physical_tlb_miss(v, vadr, type);
return IA64_FAULT;
}
-
+
+try_again:
if((data=vtlb_lookup(v, vadr,type))!=0){
if (v->domain != dom0 && type == DSIDE_TLB) {
gppa = (vadr & ((1UL << data->ps) - 1)) +
if (misr.sp)
return vmx_handle_lds(regs);
+ vcpu_get_rr(v, vadr, &rr);
+ itir = rr & (RR_RID_MASK | RR_PS_MASK);
+
if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
+ if (GOS_WINDOWS(v)) {
+ /* windows use region 4 and 5 for identity mapping */
+ if (REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL)
+ && (REGION_OFFSET(vadr)<= _PAGE_PPN_MASK)) {
+
+ pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
+ (_PAGE_P | _PAGE_A | _PAGE_D |
+ _PAGE_MA_WB | _PAGE_AR_RW);
+
+ if (thash_purge_and_insert(v, pteval, itir, vadr, type))
+ goto try_again;
+
+ return IA64_NO_FAULT;
+ }
+
+ if (REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL)
+ && (REGION_OFFSET(vadr)<= _PAGE_PPN_MASK)) {
+
+ pteval = PAGEALIGN(REGION_OFFSET(vadr),itir_ps(itir)) |
+ (_PAGE_P | _PAGE_A | _PAGE_D |
+ _PAGE_MA_UC | _PAGE_AR_RW);
+
+ if (thash_purge_and_insert(v, pteval, itir, vadr, type))
+ goto try_again;
+
+ return IA64_NO_FAULT;
+ }
+ }
+
if(vpsr.ic){
vcpu_set_isr(v, misr.val);
alt_dtlb(v, vadr);
}
/* avoid recursively walking (short format) VHPT */
- if ((((vadr ^ vpta.val) << 3) >> (vpta.size + 3)) == 0) {
+ if (!GOS_WINDOWS(v) &&
+ (((vadr ^ vpta.val) << 3) >> (vpta.size + 3)) == 0) {
+
if (vpsr.ic) {
vcpu_set_isr(v, misr.val);
dtlb_fault(v, vadr);
return IA64_FAULT;
}
} else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
- vcpu_get_rr(v, vadr, &rr);
- itir = rr & (RR_RID_MASK | RR_PS_MASK);
thash_purge_and_insert(v, pteval, itir, vadr, DSIDE_TLB);
return IA64_NO_FAULT;
} else if (vpsr.ic) {
data = vhpt_lookup(iha);
if (data == NULL) {
- data = vtlb_lookup(current, iha, DSIDE_TLB);
+ data = __vtr_lookup(current, iha, DSIDE_TLB);
if (data != NULL)
thash_vhpt_insert(current, data->page_flags, data->itir,
iha, DSIDE_TLB);
/*
* Purge overlap TCs and then insert the new entry to emulate itc ops.
* Notes: Only TC entry can purge and insert.
+ * 1 indicates this is MMIO
*/
-void thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type)
+int thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type)
{
u64 ps;//, va;
u64 phy_pte;
ia64_rr vrr, mrr;
+ int ret = 0;
ps = itir_ps(itir);
vcpu_get_rr(current, ifa, &vrr.rrval);
mrr.rrval = ia64_get_rr(ifa);
if(VMX_DOMAIN(v)){
+
+ phy_pte = translate_phy_pte(v, &pte, itir, ifa);
+
/* Ensure WB attribute if pte is related to a normal mem page,
* which is required by vga acceleration since qemu maps shared
* vram buffer with WB.
*/
- if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT))
+ if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
pte &= ~_PAGE_MA_MASK;
-
- phy_pte = translate_phy_pte(v, &pte, itir, ifa);
+ phy_pte &= ~_PAGE_MA_MASK;
+ }
+ if (pte & VTLB_PTE_IO)
+ ret = 1;
vtlb_purge(v, ifa, ps);
vhpt_purge(v, ifa, ps);
if (ps == mrr.ps) {
machine_tlb_purge(ifa, ps);
vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa);
}
+ return ret;
}
/*
*/
extern void thash_purge_entries(struct vcpu *v, u64 va, u64 ps);
extern void thash_purge_entries_remote(struct vcpu *v, u64 va, u64 ps);
-extern void thash_purge_and_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa, int type);
+extern int thash_purge_and_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa, int type);
/*
* Purge all TCs or VHPT entries including those in Hash table.
#define OS_LINUX 0xB2
#define OS_END 0xB3
+#define GOS_WINDOWS(_v) \
+ ((_v)->domain->arch.vmx_platform.gos_type == OS_WINDOWS)
+
+#define GOS_LINUX(_v) \
+ ((_v)->domain->arch.vmx_platform.gos_type == OS_LINUX)
+
/* port guest Firmware use to indicate os type
* this port is used to trigger SMI on x86,
* it is not used on ia64 */